<?xml version="1.0" encoding="utf-8" ?>
<rdf:RDF xmlns:rdf="http://www.w3.org/1999/02/22-rdf-syntax-ns#" xmlns:dc="http://purl.org/dc/elements/1.1/" xmlns:syn="http://purl.org/rss/1.0/modules/syndication/" xmlns="http://purl.org/rss/1.0/">




    



<channel rdf:about="https://cis-india.org/search_rss">
  <title>Centre for Internet and Society</title>
  <link>https://cis-india.org</link>
  
  <description>
    
            These are the search results for the query, showing results 41 to 55.
        
  </description>
  
  
  
  
  <image rdf:resource="https://cis-india.org/logo.png"/>

  <items>
    <rdf:Seq>
        
            <rdf:li rdf:resource="https://cis-india.org/internet-governance/news/roundtable-on-consumer-experiences-with-new-technologies-in-apac-singapore"/>
        
        
            <rdf:li rdf:resource="https://cis-india.org/internet-governance/news/international-conference-on-justice-education-legal-implications-of-artificial-intelligence"/>
        
        
            <rdf:li rdf:resource="https://cis-india.org/internet-governance/news/artificial-intelligence-for-indias-transformation"/>
        
        
            <rdf:li rdf:resource="https://cis-india.org/internet-governance/news/owasp-seasides-conference"/>
        
        
            <rdf:li rdf:resource="https://cis-india.org/internet-governance/blog/unbox-2019-festival"/>
        
        
            <rdf:li rdf:resource="https://cis-india.org/internet-governance/news/unescap-and-google-ai-december-13-bangkok-ai-for-social-good-summit"/>
        
        
            <rdf:li rdf:resource="https://cis-india.org/internet-governance/news/future-tech-and-future-law"/>
        
        
            <rdf:li rdf:resource="https://cis-india.org/internet-governance/news/hindustan-times-november-28-2018-kul-bhushan-amazon-launches-machine-learning-based-platform-for-healthcare-space"/>
        
        
            <rdf:li rdf:resource="https://cis-india.org/internet-governance/news/speculative-futures-lab-on-artificial-intelligence-in-media-entertainment-and-gaming"/>
        
        
            <rdf:li rdf:resource="https://cis-india.org/internet-governance/news/participation-in-the-meetings-of-iso-iec-jtc-1-sc-27-it-security-techniques"/>
        
        
            <rdf:li rdf:resource="https://cis-india.org/internet-governance/news/technology-foresight-group-tandem-researchs-ai-policy-lab-on-the-theme-ai-and-environment"/>
        
        
            <rdf:li rdf:resource="https://cis-india.org/internet-governance/news/confidentiality-of-communications-and-privacy-of-data-in-the-digital-age"/>
        
        
            <rdf:li rdf:resource="https://cis-india.org/internet-governance/blog/oxford-human-rights-hub-arindrajit-basu-october-23-2018-discrimination-in-the-age-of-artificial-intelligence"/>
        
        
            <rdf:li rdf:resource="https://cis-india.org/internet-governance/blog/the-srikrishna-committee-data-protection-bill-and-artificial-intelligence-in-india"/>
        
        
            <rdf:li rdf:resource="https://cis-india.org/internet-governance/news/unescap-google-ai-meeting"/>
        
    </rdf:Seq>
  </items>

</channel>


    <item rdf:about="https://cis-india.org/internet-governance/news/roundtable-on-consumer-experiences-with-new-technologies-in-apac-singapore">
    <title>Roundtable on Consumer Experiences with New Technologies in APAC (Singapore)</title>
    <link>https://cis-india.org/internet-governance/news/roundtable-on-consumer-experiences-with-new-technologies-in-apac-singapore</link>
    <description>
        &lt;b&gt;Arindrajit Basu was invited to a Roundtable on Artificial Intelligence:Consumer Experiences with New Technologies (APAC region). &lt;/b&gt;
        &lt;p style="text-align: justify; "&gt;The event &lt;span&gt;was hosted by Consumer International and delivered at Google, Singapore on March 26, 2019. CIS research and Arindrajit's inputs have been quoted in a report by the same name which will be released by Consumer International within the course of the next month.&lt;/span&gt;&lt;/p&gt;
        &lt;p&gt;
        For more details visit &lt;a href='https://cis-india.org/internet-governance/news/roundtable-on-consumer-experiences-with-new-technologies-in-apac-singapore'&gt;https://cis-india.org/internet-governance/news/roundtable-on-consumer-experiences-with-new-technologies-in-apac-singapore&lt;/a&gt;
        &lt;/p&gt;
    </description>
    <dc:publisher>No publisher</dc:publisher>
    <dc:creator>Admin</dc:creator>
    <dc:rights></dc:rights>

    
        <dc:subject>Internet Governance</dc:subject>
    
    
        <dc:subject>Artificial Intelligence</dc:subject>
    

   <dc:date>2019-04-15T10:25:57Z</dc:date>
   <dc:type>News Item</dc:type>
   </item>


    <item rdf:about="https://cis-india.org/internet-governance/news/international-conference-on-justice-education-legal-implications-of-artificial-intelligence">
    <title>International Conference on Justice Education:Legal Implications of Artificial Intelligence</title>
    <link>https://cis-india.org/internet-governance/news/international-conference-on-justice-education-legal-implications-of-artificial-intelligence</link>
    <description>
        &lt;b&gt;Arindrajit Basu attended the International Conference on Justice Education with the theme "Artificial Intelligence and its Legal Implications" at Institute of Law Nirma University. The event was organized by Nirma University in Ahmedabad on March 15 - 16, 2019. Arindrajit was a theme speaker for the panel on Legal Implications of Artificial Intelligence and was a judge of the presentations in the same session.&lt;/b&gt;
        &lt;p&gt;Click to &lt;a class="external-link" href="http://cis-india.org/internet-governance/files/icje-conference-schedule"&gt;read the agenda&lt;/a&gt;&lt;/p&gt;
        &lt;p&gt;
        For more details visit &lt;a href='https://cis-india.org/internet-governance/news/international-conference-on-justice-education-legal-implications-of-artificial-intelligence'&gt;https://cis-india.org/internet-governance/news/international-conference-on-justice-education-legal-implications-of-artificial-intelligence&lt;/a&gt;
        &lt;/p&gt;
    </description>
    <dc:publisher>No publisher</dc:publisher>
    <dc:creator>Admin</dc:creator>
    <dc:rights></dc:rights>

    
        <dc:subject>Internet Governance</dc:subject>
    
    
        <dc:subject>Artificial Intelligence</dc:subject>
    

   <dc:date>2019-03-20T15:52:29Z</dc:date>
   <dc:type>News Item</dc:type>
   </item>


    <item rdf:about="https://cis-india.org/internet-governance/news/artificial-intelligence-for-indias-transformation">
    <title>Artificial Intelligence for India's Transformation</title>
    <link>https://cis-india.org/internet-governance/news/artificial-intelligence-for-indias-transformation</link>
    <description>
        &lt;b&gt;ASSOCHAM's 3rd International Conference was organized at Hotel Imperial in New Delhi. Amber Sinha a session on use, impact and ethics in AI. &lt;/b&gt;
        &lt;p&gt;Click to &lt;a class="external-link" href="http://cis-india.org/internet-governance/files/ai-in-ethics-agenda/view"&gt;view the agenda&lt;/a&gt;.&lt;/p&gt;
        &lt;p&gt;
        For more details visit &lt;a href='https://cis-india.org/internet-governance/news/artificial-intelligence-for-indias-transformation'&gt;https://cis-india.org/internet-governance/news/artificial-intelligence-for-indias-transformation&lt;/a&gt;
        &lt;/p&gt;
    </description>
    <dc:publisher>No publisher</dc:publisher>
    <dc:creator>Admin</dc:creator>
    <dc:rights></dc:rights>

    
        <dc:subject>Internet Governance</dc:subject>
    
    
        <dc:subject>Artificial Intelligence</dc:subject>
    

   <dc:date>2019-03-20T01:38:48Z</dc:date>
   <dc:type>News Item</dc:type>
   </item>


    <item rdf:about="https://cis-india.org/internet-governance/news/owasp-seasides-conference">
    <title>OWASP Seasides Conference</title>
    <link>https://cis-india.org/internet-governance/news/owasp-seasides-conference</link>
    <description>
        &lt;b&gt;Karan Saini attended the OWASP Seasides security conference held on February 27 and 28, 2019 at Cavelossim, Goa. The event was organized by OWASP Seasides.&lt;/b&gt;
        &lt;p&gt;For conference details &lt;a class="external-link" href="https://www.owaspseasides.com/schedule/workshops"&gt;click here&lt;/a&gt;.&lt;/p&gt;
        &lt;p&gt;
        For more details visit &lt;a href='https://cis-india.org/internet-governance/news/owasp-seasides-conference'&gt;https://cis-india.org/internet-governance/news/owasp-seasides-conference&lt;/a&gt;
        &lt;/p&gt;
    </description>
    <dc:publisher>No publisher</dc:publisher>
    <dc:creator>Admin</dc:creator>
    <dc:rights></dc:rights>

    
        <dc:subject>Internet Governance</dc:subject>
    
    
        <dc:subject>Artificial Intelligence</dc:subject>
    

   <dc:date>2019-03-07T23:53:47Z</dc:date>
   <dc:type>News Item</dc:type>
   </item>


    <item rdf:about="https://cis-india.org/internet-governance/blog/unbox-2019-festival">
    <title>Unbox Festival 2019: CIS organizes two Workshops</title>
    <link>https://cis-india.org/internet-governance/blog/unbox-2019-festival</link>
    <description>
        &lt;b&gt;Centre for Internet &amp; Society organized two workshops at the Unbox Festival 2019, in Bangalore, on 15 and 17 February 2019. &lt;/b&gt;
        &lt;h3 style="text-align: justify; "&gt;'What is your Feminist Infrastructure Wishlist?&lt;/h3&gt;
&lt;p style="text-align: justify; "&gt;The first workshop 'What is your Feminist Infrastructure Wishlist?' was on Feminist Infrastructure Wishlists that was conducted by P.P. Sneha and Saumyaa Naidu on  15 February 2019. The objective of the workshop was to explore what it means to have infrastructure that is feminist. How do we build spaces, networks, and systems that are equal, inclusive, diverse, and accessible? We will also reflect on questions of network configurations, expertise, labour and visibility. For reading material &lt;a class="external-link" href="https://feministinternet.org/"&gt;click here&lt;/a&gt;.&lt;/p&gt;
&lt;h3 style="text-align: justify; "&gt;AI for Good&lt;/h3&gt;
&lt;p style="text-align: justify; "&gt;With a backdrop of AI for social good, we explore existing applications of artificial intelligence, how we interact and engage with this technology on a daily basis. A discussion led by Saumyaa Naidu and Shweta Mohandas invited participants to examine current narratives around AI and imagine how these may transform with time. Questions around how we can build an AI for the future will become the starting point to trace its implications relating to social impact, policy, gender, design, and privacy. For reading materials see &lt;a class="external-link" href="https://ainowinstitute.org/AI_Now_2018_Report.pdf"&gt;AI Now Report 2018&lt;/a&gt;, &lt;a class="external-link" href="https://www.propublica.org/article/machine-bias-risk-assessments-in-criminal-sentencing"&gt;Machine Bias&lt;/a&gt;, and &lt;a class="external-link" href="https://www.theatlantic.com/technology/archive/2016/03/why-do-so-many-digital-assistants-have-feminine-names/475884/"&gt;Why Do So Many Digital Assistants Have Feminine Names?&lt;/a&gt;&lt;/p&gt;
&lt;p style="text-align: justify; "&gt;For info on Unbox Festival, &lt;a class="external-link" href="http://unboxfestival.com/"&gt;click here&lt;/a&gt;&lt;/p&gt;
        &lt;p&gt;
        For more details visit &lt;a href='https://cis-india.org/internet-governance/blog/unbox-2019-festival'&gt;https://cis-india.org/internet-governance/blog/unbox-2019-festival&lt;/a&gt;
        &lt;/p&gt;
    </description>
    <dc:publisher>No publisher</dc:publisher>
    <dc:creator>saumyaa</dc:creator>
    <dc:rights></dc:rights>

    
        <dc:subject>Gender</dc:subject>
    
    
        <dc:subject>Internet Governance</dc:subject>
    
    
        <dc:subject>Artificial Intelligence</dc:subject>
    

   <dc:date>2019-02-26T01:53:39Z</dc:date>
   <dc:type>Blog Entry</dc:type>
   </item>


    <item rdf:about="https://cis-india.org/internet-governance/news/unescap-and-google-ai-december-13-bangkok-ai-for-social-good-summit">
    <title>AI for Social Good Summit</title>
    <link>https://cis-india.org/internet-governance/news/unescap-and-google-ai-december-13-bangkok-ai-for-social-good-summit</link>
    <description>
        &lt;b&gt;Arindrajit Basu was a speaker at the event co-organized by Google AI and United Nations ESCAP on December 13, 2018 in Bangkok, Thailand.&lt;/b&gt;
        &lt;p class="moz-quote-pre" style="text-align: justify; "&gt;Arindrajit spoke at the panel " How can governments use AI in Public Service Delivery" along with Malavika Jayaram, Jake Lucci,Punit Shukla,Simon Schmooly and Gal Oren. He presented CIS research on AI in agriculture in Karnataka-which will be published as part of a compendium documenting case studies worldwide soon.&lt;/p&gt;
&lt;p class="moz-quote-pre" style="text-align: justify; "&gt;&lt;a class="external-link" href="http://cis-india.org/internet-governance/files/ai-for-social-good-summit"&gt;Click to read more&lt;/a&gt;&lt;/p&gt;
        &lt;p&gt;
        For more details visit &lt;a href='https://cis-india.org/internet-governance/news/unescap-and-google-ai-december-13-bangkok-ai-for-social-good-summit'&gt;https://cis-india.org/internet-governance/news/unescap-and-google-ai-december-13-bangkok-ai-for-social-good-summit&lt;/a&gt;
        &lt;/p&gt;
    </description>
    <dc:publisher>No publisher</dc:publisher>
    <dc:creator>Admin</dc:creator>
    <dc:rights></dc:rights>

    
        <dc:subject>Internet Governance</dc:subject>
    
    
        <dc:subject>Artificial Intelligence</dc:subject>
    

   <dc:date>2018-12-25T01:02:01Z</dc:date>
   <dc:type>News Item</dc:type>
   </item>


    <item rdf:about="https://cis-india.org/internet-governance/news/future-tech-and-future-law">
    <title>Future Tech and Future Law</title>
    <link>https://cis-india.org/internet-governance/news/future-tech-and-future-law</link>
    <description>
        &lt;b&gt;The Dept. of IT &amp; BT, Government of Karnataka organised the 21st edition of Bengaluru Tech Summit from November 29, 2018 to December 1, 2018 at Palace Grounds, Bengaluru. Arindrajit Basu was a speaker at the panel on 'Future Tech and Future Law'.&lt;/b&gt;
        &lt;p class="moz-quote-pre" style="text-align: justify; "&gt;The discussion was moderated by Tanvi Ratna. Aayush's co-panelists were Apar Gupta,Jaideep Reddy and Nilesh Trivedi. During his remarks, he attempted to focus  on our AI research thus far and our suggestions for AI regulation.&lt;/p&gt;
&lt;p class="moz-quote-pre" style="text-align: justify; "&gt;For more details &lt;a class="external-link" href="https://www.bengalurutechsummit.com/"&gt;see this page&lt;/a&gt;.&lt;/p&gt;
        &lt;p&gt;
        For more details visit &lt;a href='https://cis-india.org/internet-governance/news/future-tech-and-future-law'&gt;https://cis-india.org/internet-governance/news/future-tech-and-future-law&lt;/a&gt;
        &lt;/p&gt;
    </description>
    <dc:publisher>No publisher</dc:publisher>
    <dc:creator>Admin</dc:creator>
    <dc:rights></dc:rights>

    
        <dc:subject>Internet Governance</dc:subject>
    
    
        <dc:subject>Artificial Intelligence</dc:subject>
    

   <dc:date>2019-01-03T01:17:29Z</dc:date>
   <dc:type>News Item</dc:type>
   </item>


    <item rdf:about="https://cis-india.org/internet-governance/news/hindustan-times-november-28-2018-kul-bhushan-amazon-launches-machine-learning-based-platform-for-healthcare-space">
    <title>Amazon launches Machine Learning-based platform for healthcare space</title>
    <link>https://cis-india.org/internet-governance/news/hindustan-times-november-28-2018-kul-bhushan-amazon-launches-machine-learning-based-platform-for-healthcare-space</link>
    <description>
        &lt;b&gt;Amazon’s Comprehend Medical platform uses a new HIPAA-eligible machine learning service to process unstructured medical text and information such as dosages, symptoms and signs, and patient diagnosis.&lt;/b&gt;
        &lt;p style="text-align: justify; "&gt;The article by Kul Bhushan was published in the &lt;a class="external-link" href="https://www.hindustantimes.com/tech/nov-28-amazon-launches-machine-learning-driven-platform-for-healthcare-space/story-3EuXjDiVO8NLBxjOMKkopO.html"&gt;Hindustan Times&lt;/a&gt; on November 28, 2018.&lt;/p&gt;
&lt;hr style="text-align: justify; " /&gt;
&lt;p style="text-align: justify; "&gt;With an objective to push deeper into the health space, Amazon has introduced a new &lt;a href="https://www.hindustantimes.com/topic/machine-learning"&gt;Machine Learning&lt;/a&gt; (ML) software to analyse medical records for better treatments of patients and reduce overall expenditure.&lt;/p&gt;
&lt;p style="text-align: justify; "&gt;Unveiled  at the company’s re:Invent cloud conference in Las Vegas, Amazon’s  Comprehend Medical platform uses a new “HIPAA-eligible machine learning  service that allows developers to process unstructured medical text and  identify information such as patient diagnosis, treatments, dosages,  symptoms and signs, and more.”&lt;/p&gt;
&lt;p style="text-align: justify; "&gt;“Comprehend Medical helps health  care providers, insurers, researchers, and clinical trial investigators  as well as health care IT, biotech, and pharmaceutical companies to  improve clinical decision support, streamline revenue cycle and clinical  trials management, and better address data privacy and protected health  information (PHI) requirements,” explains the company on its &lt;a href="https://aws.amazon.com/blogs/machine-learning/introducing-medical-language-processing-with-amazon-comprehend-medical/" rel="nofollow"&gt;website&lt;/a&gt;.&lt;/p&gt;
&lt;p style="text-align: justify; "&gt;Amazon  aims to mitigate the time spent on manually analysing medical data of a  patient. The company hopes the software will ultimately empower users  to make a more informed decision about their health and even things like  scheduling care visits.&lt;/p&gt;
&lt;p style="text-align: justify; "&gt;“Unlocking this information from medical language makes a variety of  common medical use cases easier and cost-effective, including: clinical  decision support (e.g., getting a historical snapshot of a patient’s  medical history), revenue cycle management (e.g., simplifying the  time-intensive manual process of data entry), clinical trial management  (e.g., by identifying and recruiting patients with certain attributes  into clinical trials), building population health platforms, and helping  address (PHI) requirements (e.g., for privacy and security  assurance.),” the company added.&lt;/p&gt;
&lt;p style="text-align: justify; "&gt;Amazon also pointed out that some  of the medical institutes such as Seattle’s Fred Hutchinson Cancer  Research Center and Roche Diagnostics have already implemented the  software.&lt;/p&gt;
&lt;p style="text-align: justify; "&gt;Amazon’s expansion into the healthcare space comes after it acquired  health-focused startup PillPack for $1 billion earlier this year. Apart  from Amazon, other technology companies like Apple and Microsoft are  investing into the healthcare space.&lt;/p&gt;
&lt;p style="text-align: justify; "&gt;Apple is already offering  HealthKit and CareKit platforms to develop apps focused on health. The  company earlier this year launched &lt;a href="https://www.hindustantimes.com/tech/apple-watch-series-4-launched-with-ecg-compatibility-new-design/story-2LqdNq7YjAXGU3HEH5om8N.html"&gt;Apple Watch Series 4 with ECG support&lt;/a&gt;.  Microsoft, however, has deeper footprints in the health segment. The  company is building a bunch of Artificial Intelligence-based tools for  healthcare.&lt;/p&gt;
&lt;p style="text-align: justify; "&gt;For instance, Microsoft’s Project InnerEye uses machine learning  technology to build tools for automatic, quantitative analysis of  three-dimensional radiological images.&lt;/p&gt;
&lt;p style="text-align: justify; "&gt;According to various  reports, Artificial Intelligence is going to make a big impact in the  healthcare industry. An Accenture report in 2017 &lt;a href="https://www.accenture.com/t20171215T032059Z__w__/us-en/_acnmedia/PDF-49/Accenture-Health-Artificial-Intelligence.pdf" rel="nofollow" target="_blank"&gt;predicted&lt;/a&gt; that the AI apps can create $150 billion in annual savings for the United States alone.&lt;/p&gt;
&lt;p style="text-align: justify; "&gt;Back in India, the adoption of AI in healthcare is growing. According  to a report by the Centre for Internet and Society India, “the use of  AI in healthcare in India is increasing with new startups and large ICT  companies offering AI solutions for healthcare challenges in the  country.”&lt;/p&gt;
&lt;p style="text-align: justify; "&gt;Bengalure-based startup mfine has developed an AI-based  healthcare platform which learns medical standards and protocols and  diagnosis and treatment methods to further help the doctors with  necessary data and analysis. The company earlier this year raised $4.2  million in funding.&lt;/p&gt;
        &lt;p&gt;
        For more details visit &lt;a href='https://cis-india.org/internet-governance/news/hindustan-times-november-28-2018-kul-bhushan-amazon-launches-machine-learning-based-platform-for-healthcare-space'&gt;https://cis-india.org/internet-governance/news/hindustan-times-november-28-2018-kul-bhushan-amazon-launches-machine-learning-based-platform-for-healthcare-space&lt;/a&gt;
        &lt;/p&gt;
    </description>
    <dc:publisher>No publisher</dc:publisher>
    <dc:creator>Admin</dc:creator>
    <dc:rights></dc:rights>

    
        <dc:subject>Internet Governance</dc:subject>
    
    
        <dc:subject>Artificial Intelligence</dc:subject>
    

   <dc:date>2018-12-03T00:23:06Z</dc:date>
   <dc:type>News Item</dc:type>
   </item>


    <item rdf:about="https://cis-india.org/internet-governance/news/speculative-futures-lab-on-artificial-intelligence-in-media-entertainment-and-gaming">
    <title>Speculative Futures Lab on Artificial Intelligence in Media, Entertainment, and Gaming</title>
    <link>https://cis-india.org/internet-governance/news/speculative-futures-lab-on-artificial-intelligence-in-media-entertainment-and-gaming</link>
    <description>
        &lt;b&gt;Pranav Manjesh Bidare attended the event organised by Quicksand between November 16 and 18, 2018 in Bangalore as a panelist.&lt;/b&gt;
        &lt;p style="text-align: justify; "&gt;Pranav was a panelist in the session discussing "Ethics of AI in the Creative  spaces" on November 17, alongside Urvashi Aneja, and Abishek Reddy from  Tandem Research. For more info &lt;a class="external-link" href="http://cis-india.org/internet-governance/files/Quicksand%20AI%20Futures%20Lab.pdf"&gt;see this&lt;/a&gt;.&lt;/p&gt;
        &lt;p&gt;
        For more details visit &lt;a href='https://cis-india.org/internet-governance/news/speculative-futures-lab-on-artificial-intelligence-in-media-entertainment-and-gaming'&gt;https://cis-india.org/internet-governance/news/speculative-futures-lab-on-artificial-intelligence-in-media-entertainment-and-gaming&lt;/a&gt;
        &lt;/p&gt;
    </description>
    <dc:publisher>No publisher</dc:publisher>
    <dc:creator>Admin</dc:creator>
    <dc:rights></dc:rights>

    
        <dc:subject>Internet Governance</dc:subject>
    
    
        <dc:subject>Artificial Intelligence</dc:subject>
    

   <dc:date>2018-12-05T03:12:58Z</dc:date>
   <dc:type>News Item</dc:type>
   </item>


    <item rdf:about="https://cis-india.org/internet-governance/news/participation-in-the-meetings-of-iso-iec-jtc-1-sc-27-it-security-techniques">
    <title>Participation in the meetings of ISO/IEC JTC 1/SC 27 'IT Security techniques'</title>
    <link>https://cis-india.org/internet-governance/news/participation-in-the-meetings-of-iso-iec-jtc-1-sc-27-it-security-techniques</link>
    <description>
        &lt;b&gt;From 30 September 2018 to 4 October 2018, Gurshabad Grover participated in the meetings of the working groups of ISO/IEC JTC 1/SC 27 'IT Security techniques' held in Gjøvik, Norway. The meetings were organized by Standards Norway with support from NTNU, Microsoft, Telenor, et.al.&lt;/b&gt;
        &lt;p&gt;Gurshabad mainly focused on the meetings of Working Group 5 responsible for standards and research in "Identity management and privacy technologies" in SC 27. I attended sessions discussing work related to current ISO/IEC standards and upcoming work in the WG, such as:&lt;/p&gt;
&lt;ul&gt;
&lt;li&gt;Establishing a PII deletion concept in organizations&lt;/li&gt;
&lt;/ul&gt;
&lt;ul&gt;
&lt;li&gt;Privacy guidelines for smart cities&lt;/li&gt;
&lt;/ul&gt;
&lt;ul&gt;
&lt;li&gt;Additional privacy-enhancing data de-identification standards&lt;/li&gt;
&lt;/ul&gt;
&lt;ul&gt;
&lt;li&gt;Extension to ISO/IEC 27001 and ISO/IEC 27002 for privacy information management&lt;/li&gt;
&lt;/ul&gt;
&lt;ul&gt;
&lt;li&gt;User-centric framework for PII handling based on user privacy preferences&lt;/li&gt;
&lt;/ul&gt;
&lt;p&gt;&lt;br /&gt;Gurshabad will be a co-rapporteur on a 12-month study period to investigate the 'Impact of Artificial Intelligence on Privacy' which was initiated by the WG in the meeting. Additionally, I was a part of the drafting committee which prepared the final resolutions and liaison statements from the meeting.&lt;/p&gt;
&lt;p style="text-align: justify; "&gt;Gurshabad also attended the Norwegian Business Forum on cyber security which was held on October 4th, which featured talks by professionals and academicians working in cyber security in their different sectors. The agenda for the business forum can be &lt;a class="external-link" href="http://www.standard.no/en/kurs-og-arrangementer/arrangement-standard-norge-og-nek/arrangement-fra-standard-norge/business-forum---cyber-security/"&gt;found here&lt;/a&gt;.&lt;/p&gt;
        &lt;p&gt;
        For more details visit &lt;a href='https://cis-india.org/internet-governance/news/participation-in-the-meetings-of-iso-iec-jtc-1-sc-27-it-security-techniques'&gt;https://cis-india.org/internet-governance/news/participation-in-the-meetings-of-iso-iec-jtc-1-sc-27-it-security-techniques&lt;/a&gt;
        &lt;/p&gt;
    </description>
    <dc:publisher>No publisher</dc:publisher>
    <dc:creator>Admin</dc:creator>
    <dc:rights></dc:rights>

    
        <dc:subject>Internet Governance</dc:subject>
    
    
        <dc:subject>Artificial Intelligence</dc:subject>
    
    
        <dc:subject>Privacy</dc:subject>
    

   <dc:date>2018-10-31T01:28:29Z</dc:date>
   <dc:type>News Item</dc:type>
   </item>


    <item rdf:about="https://cis-india.org/internet-governance/news/technology-foresight-group-tandem-researchs-ai-policy-lab-on-the-theme-ai-and-environment">
    <title>Technology Foresight Group Tandem Research's AI policy lab on the theme AI and Environment</title>
    <link>https://cis-india.org/internet-governance/news/technology-foresight-group-tandem-researchs-ai-policy-lab-on-the-theme-ai-and-environment</link>
    <description>
        &lt;b&gt;Shweta Mohandas attended a roundtable discussion on artificial intelligence and environment held at Tandem Research's office in Goa on October 5, 2018. She also made the  framing intervention for the first session by addressing the question - What are the likely ethical conundrums, and plausible unintended consequences of the use of AI for sustainability?&lt;/b&gt;
        &lt;dl style="text-align: justify; "&gt;
&lt;p&gt;Conversations at the lab clustered around four main themes:&lt;/p&gt;
&lt;p&gt;&lt;b&gt;AI in the Anthropocene&lt;/b&gt;&lt;br /&gt;What are the most critical sustainability challenges in India – and can AI be useful in addressing them? What are the likely ethical conundrums, and plausible unintended consequences of the use of AI for sustainability?&lt;br /&gt;&lt;br /&gt;&lt;b&gt;Conservation after nature&lt;/b&gt;&lt;br /&gt;What AI interventions are possible to foster better conservation and can AI driven citizen science initiatives improve people’s relationship with the natural world? Can AI help imagine a more dynamic and proximate co-existence with other species, after nature?&lt;br /&gt;&lt;br /&gt;&lt;b&gt;Water ecosystems&lt;/b&gt;&lt;br /&gt;Can AI help us imagine new paradigm of water control and infrastructure that are more dynamic and ‘mirror’ the complexity of natural water systems? Will AI lead to decentralization and empowerment of water users or will it result in centralized models and loss of power and agency of water users?&lt;br /&gt;&lt;br /&gt;&lt;b&gt;Future Cities&lt;/b&gt;&lt;br /&gt;Can AI systems be used to foster sustainability practices around mobility, energy, waste, and help better plan development zones and create early warning systems? What systems can be built to encourage citizen participation for solving sustainability problems and increase transparency and accountability of municipal governments?&lt;/p&gt;
&lt;/dl&gt;
        &lt;p&gt;
        For more details visit &lt;a href='https://cis-india.org/internet-governance/news/technology-foresight-group-tandem-researchs-ai-policy-lab-on-the-theme-ai-and-environment'&gt;https://cis-india.org/internet-governance/news/technology-foresight-group-tandem-researchs-ai-policy-lab-on-the-theme-ai-and-environment&lt;/a&gt;
        &lt;/p&gt;
    </description>
    <dc:publisher>No publisher</dc:publisher>
    <dc:creator>Admin</dc:creator>
    <dc:rights></dc:rights>

    
        <dc:subject>Internet Governance</dc:subject>
    
    
        <dc:subject>Artificial Intelligence</dc:subject>
    
    
        <dc:subject>Privacy</dc:subject>
    

   <dc:date>2018-10-31T01:10:34Z</dc:date>
   <dc:type>News Item</dc:type>
   </item>


    <item rdf:about="https://cis-india.org/internet-governance/news/confidentiality-of-communications-and-privacy-of-data-in-the-digital-age">
    <title>Confidentiality of Communications and Privacy of Data in the Digital Age</title>
    <link>https://cis-india.org/internet-governance/news/confidentiality-of-communications-and-privacy-of-data-in-the-digital-age</link>
    <description>
        &lt;b&gt;On September 25, 2018, Elonnai Hickok participated in a side event Confidentiality of Communications and Privacy of Data in the Digital Age organized by INCLO and Privacy International at the Human Rights Council 39th ordinary session. Elonnai spoke on artificial intelligence and privacy.&lt;/b&gt;
        
        &lt;p&gt;
        For more details visit &lt;a href='https://cis-india.org/internet-governance/news/confidentiality-of-communications-and-privacy-of-data-in-the-digital-age'&gt;https://cis-india.org/internet-governance/news/confidentiality-of-communications-and-privacy-of-data-in-the-digital-age&lt;/a&gt;
        &lt;/p&gt;
    </description>
    <dc:publisher>No publisher</dc:publisher>
    <dc:creator>praskrishna</dc:creator>
    <dc:rights></dc:rights>

    
        <dc:subject>Internet Governance</dc:subject>
    
    
        <dc:subject>Artificial Intelligence</dc:subject>
    
    
        <dc:subject>Privacy</dc:subject>
    

   <dc:date>2018-10-28T06:02:07Z</dc:date>
   <dc:type>News Item</dc:type>
   </item>


    <item rdf:about="https://cis-india.org/internet-governance/blog/oxford-human-rights-hub-arindrajit-basu-october-23-2018-discrimination-in-the-age-of-artificial-intelligence">
    <title>Discrimination in the Age of Artificial Intelligence </title>
    <link>https://cis-india.org/internet-governance/blog/oxford-human-rights-hub-arindrajit-basu-october-23-2018-discrimination-in-the-age-of-artificial-intelligence</link>
    <description>
        &lt;b&gt;The dawn of Artificial Intelligence (AI) has been celebrated by both government and industry across the globe. AI offers the potential to augment many existing bureaucratic processes and improve human capacity, if implemented in accordance with principles of the rule of law and international human rights norms. Unfortunately, AI-powered solutions have often been implemented in ways that have resulted  in the automation, rather than mitigation, of existing societal inequalities.&lt;/b&gt;
        &lt;p&gt;This was originally published by &lt;a class="external-link" href="http://ohrh.law.ox.ac.uk/discrimination-in-the-age-of-artificial-intelligence/"&gt;Oxford Human Rights Hub&lt;/a&gt; on October 23, 2018&lt;/p&gt;
&lt;hr /&gt;
&lt;p style="text-align: justify; "&gt;&lt;img src="https://cis-india.org/home-images/ArtificialIntelligence.jpg/@@images/3b551d39-e419-442c-8c9d-7916a2d39378.jpeg" alt="Artificial Intelligence" class="image-inline" title="Artificial Intelligence" /&gt;&lt;/p&gt;
&lt;p style="text-align: justify; "&gt;Image Credit: Sarla Catt via Flickr, used under a Creative Commons license available at https://creativecommons.org/licenses/by/2.0/&lt;/p&gt;
&lt;p style="text-align: justify; "&gt;In the international human rights law context, AI solutions pose a  threat to norms which prohibit discrimination. International Human  Rights Law &lt;a href="https://books.google.co.in/books/about/International_Human_Rights_Law.html?id=YkcXAgAAQBAJ&amp;amp;redir_esc=y"&gt;recognizes that discrimination&lt;/a&gt; may take place in two possible ways, directly or indirectly. Direct  discrimination occurs when an individual is treated less favourably than  someone else similarly situated on one of the grounds prohibited in  international law, which, as per the &lt;a href="http://www.equalrightstrust.org/ertdocumentbank/Human%20Rights%20Committee,%20General%20Comment%2018.pdf"&gt;Human Rights Committee,&lt;/a&gt; includes race, colour, sex, language, religion, political or other  opinion, national or social origin, property, birth or other status.  Indirect discrimination occurs when a policy, rule or requirement is  ‘outwardly neutral’ but has a disproportionate impact on certain groups  that are meant to be protected by one of the prohibited grounds of  discrimination. A clear example of indirect discrimination recognized by  the European Court of Human Rights arose in the case of &lt;a href="http://www.errc.org/cikk.php?cikk=3559"&gt;&lt;i&gt;DH&amp;amp;Ors v Czech Republic&lt;/i&gt;&lt;/a&gt;.  The ECtHR struck down an apparently neutral set of statutory rules,  which implemented a set of tests designed to evaluate the intellectual  capability of children but which resulted in an excessively high  proportion of minority Roma children scoring poorly and consequently  being sent to special schools, possibly because the tests were blind to  cultural and linguistic differences. This case acts as a useful analogy  for the potential disparate impacts of AI and should serve as useful  precedent for future litigation against AI-driven solutions.&lt;/p&gt;
&lt;p style="text-align: justify; "&gt;Indirect discrimination by AI may occur &lt;a href="https://cis-india.org/internet-governance/ai-and-governance-case-study-pdf"&gt;at two stages&lt;/a&gt;. First is the &lt;b&gt;usage of incomplete or inaccurate training data&lt;/b&gt; that results in the algorithm processing data that may not accurately reflect reality. Cathy O’Neil explains this &lt;a href="https://weaponsofmathdestructionbook.com/"&gt;using a simple example&lt;/a&gt;.  There are two types of crimes-those that are ‘reported’ and others that  are only ‘found’ if a policeman is patrolling the area. The first  category includes serious crimes such as murder or rape while the second  includes petty crimes such as vandalism or possession of illicit drugs  in small quantities. Increased police surveillance in areas in US cities  where Black or Hispanic people reside lead to more crimes being ‘found’  there. Thus, data is likely to suggest that these communities commit a  higher proportion of crimes than they actually do – indirect  discrimination that has been empirically been shown through research  published by &lt;a href="https://www.propublica.org/article/bias-in-criminal-risk-scores-is-mathematically-inevitable-researchers-say"&gt;Pro Publica&lt;/a&gt;.&lt;/p&gt;
&lt;p style="text-align: justify; "&gt;Discrimination may also occur at the stage of &lt;b&gt;data processing&lt;/b&gt;, which is done through a metaphorical &lt;a href="https://www.sentient.ai/blog/understanding-black-box-artificial-intelligence/"&gt;‘black-box’&lt;/a&gt; that accepts inputs and generates outputs without revealing to the  human developer how the data was processed. This conundrum is compounded  by the fact that the algorithms are often utilised to solve an  amorphous problem-which attempts to break down a complex question into a  simple answer. An example is the development of ‘risk profiles’ of  individuals for the  &lt;a href="http://fortune.com/longform/ai-bias-problem/"&gt;determination of insurance premiums.&lt;/a&gt; Data might show that an accident is more likely to take place in inner  cities due  to more densely packed populations in these areas. Racial  and ethnic minorities tend to reside more in these areas, which means  that algorithms could learn that minorities are more likely to get into  accidents, thereby generating an outcome (‘risk profile’) that  indirectly discriminates on grounds of race or ethnicity.&lt;/p&gt;
&lt;p style="text-align: justify; "&gt;It would be wrong to ignore discrimination, both direct and indirect,  that occurs as a result of human prejudice. The key difference between  that and discrimination by AI lies in the ability of other individuals  to compel the decision-maker to explain the factors that lead to the  outcome in question and testing its validity against principles of human  rights. The increasing amounts of discretion and, consequently, power  being delegated to autonomous systems mean that principles of  accountability which audit and check indirect discrimination need to be  built into the design of these systems. In the absence of these  principles, we risk surrendering core tenets of human rights law to the  whims of an algorithmically crafted reality.&lt;/p&gt;
        &lt;p&gt;
        For more details visit &lt;a href='https://cis-india.org/internet-governance/blog/oxford-human-rights-hub-arindrajit-basu-october-23-2018-discrimination-in-the-age-of-artificial-intelligence'&gt;https://cis-india.org/internet-governance/blog/oxford-human-rights-hub-arindrajit-basu-october-23-2018-discrimination-in-the-age-of-artificial-intelligence&lt;/a&gt;
        &lt;/p&gt;
    </description>
    <dc:publisher>No publisher</dc:publisher>
    <dc:creator>Arindrajit Basu</dc:creator>
    <dc:rights></dc:rights>

    
        <dc:subject>Internet Governance</dc:subject>
    
    
        <dc:subject>Artificial Intelligence</dc:subject>
    
    
        <dc:subject>Privacy</dc:subject>
    

   <dc:date>2018-10-26T14:47:57Z</dc:date>
   <dc:type>Blog Entry</dc:type>
   </item>


    <item rdf:about="https://cis-india.org/internet-governance/blog/the-srikrishna-committee-data-protection-bill-and-artificial-intelligence-in-india">
    <title>The Srikrishna Committee Data Protection Bill and Artificial Intelligence in India</title>
    <link>https://cis-india.org/internet-governance/blog/the-srikrishna-committee-data-protection-bill-and-artificial-intelligence-in-india</link>
    <description>
        &lt;b&gt;Artificial Intelligence in many ways is in direct conflict with traditional data protection principles and requirements including consent, purpose limitation, data minimization, retention and deletion, accountability, and transparency.&lt;/b&gt;
        &lt;h3 style="text-align: justify; "&gt;Privacy Considerations in AI&lt;/h3&gt;
&lt;p style="text-align: justify; "&gt;Other related privacy concerns in the context of AI center around re-identification and de-anonymisation, discrimination, unfairness, inaccuracies, bias, opacity, profiling, and misuse of data and imbedded power dynamics.&lt;a href="#_ftn1" name="_ftnref1"&gt;&lt;sup&gt;[1]&lt;/sup&gt;&lt;/a&gt;&lt;/p&gt;
&lt;p style="text-align: justify; "&gt;The need for large amounts of data to improve accuracy, the ability to process vast amounts of granular data, and the present relationship between explainability and result of AI systems&lt;a href="#_ftn2" name="_ftnref2"&gt;&lt;sup&gt;&lt;sup&gt;[2]&lt;/sup&gt;&lt;/sup&gt;&lt;/a&gt; have raised many concerns on both sides of the fence. On one hand, there is concern that heavy handed or inappropriate regulation will result in stifling innovation. If developers can only use data for pre-defined purpose - the prospects of AI are limited. On the other hand, individuals are concerned that privacy will be significantly undermined in light of AI systems that collect and process data in realtime and at a personal level not previously possible. Chatbots, house assistants, wearable devices, robot caregivers, facial recognition technology etc.  have the ability to collect data from a person at an intimate level. At the sametime, some have argued that AI can work towards protecting privacy by limiting the access that humans working at respective companies have to personal data.&lt;a href="#_ftn3" name="_ftnref3"&gt;&lt;sup&gt;&lt;sup&gt;[3]&lt;/sup&gt;&lt;/sup&gt;&lt;/a&gt;&lt;/p&gt;
&lt;p style="text-align: justify; "&gt;India is embracing AI. Two national roadmaps for AI were released in 2018 respectively by the Ministry of Commerce and Industry and Niti Aayog. Both roadmaps emphasized the importance of addressing privacy concerns in the context of AI and ensuring that a robust privacy legislation is enacted. In August 2018, the Srikrishna Committee released a draft Personal Data Protection Bill 2018 and the associated report that outlines and justifies a framework for privacy in India. As the development and use of AI in India continues to grow, it is important that India simultaneously moves forward with a privacy framework that addresses the privacy dimensions of AI.&lt;/p&gt;
&lt;p style="text-align: justify; "&gt;In this article we attempt to analyse if and how the Srikrishna committee draft Bill  and report has addressed AI, contrast this with developments in the EU and the passing of the GDPR, and identify solutions that are being explored towards finding a way to develop AI while upholding and safeguarding privacy.&lt;/p&gt;
&lt;h3 style="text-align: justify; "&gt;The GDPR and Artificial Intelligence&lt;/h3&gt;
&lt;p style="text-align: justify; "&gt;The General Data Protection Regulation became enforceable in May 2018 and establishes a framework for the processing of personal data for individuals within the European Union. The GDPR has been described by IAAP  as taking a ‘risk based’ approach to data protection that pushes data controllers to engage in risk analysis and adopt ‘risk measured responses’.&lt;a href="#_ftn4" name="_ftnref4"&gt;&lt;sup&gt;&lt;sup&gt;[4]&lt;/sup&gt;&lt;/sup&gt;&lt;/a&gt; Though the GDPR does not explicitly address artificial intelligence, it does have a number of provisions that address automated decision making and profiling and a number of provisions that will impact companies using artificial intelligence in their business activities. These have been outlined below:&lt;/p&gt;
&lt;ol style="text-align: justify; "&gt;
&lt;li&gt;&lt;b&gt;Data rights: &lt;/b&gt; The GDPR enables individuals with a number of  data rights: the right to be informed, right of access, right to rectification, right to erasure, right to restrict processing, right to data portability, right to object, and rights related to automated decision making including profiling.  The last right - rights related to automated decision making - seeks to address concerns arising out of automated decision making by giving the individual the right to request to not be subject to a decision based solely on automated decision making including profiling if the decision would produce legal effects or similarly significantly affects them.  There are three exceptions to this right - if the automated decision making is:  a. necessary for the performance of a contract, b. authorised by the Union or Member State c. is based on explicit consent.&lt;a href="#_ftn5" name="_ftnref5"&gt;&lt;sup&gt;&lt;sup&gt;[5]&lt;/sup&gt;&lt;/sup&gt;&lt;/a&gt; &lt;/li&gt;
&lt;li&gt;&lt;b&gt;Transparency:&lt;/b&gt; Under Article 14, data controllers must enable the right to opt out of automated decision making by notifying individuals of the existence of automated decision making including profiling and providing meaningful information about the logic involved as well as the potential consequences of such processing.&lt;a href="#_ftn6" name="_ftnref6"&gt;&lt;sup&gt;&lt;sup&gt;[6]&lt;/sup&gt;&lt;/sup&gt;&lt;/a&gt; Importantly, this requirement has the potential of ensuring that companies do not operate complete  ‘black box’ algorithms within their business processes.&lt;/li&gt;
&lt;li&gt;&lt;b&gt;Fairness: &lt;/b&gt;The principle of fairness found under Article 5(1) will also apply to the processing of personal data by AI. The principle requires that personal data must be processed in a way to meet the three conditions of lawfully, fairly, and in a transparent manner in relation to the data subject. Recital 71 further clarifies that this will include implementing appropriate mathematical and statistical measures for profiling, ensuring that inaccuracies are corrected, and  ensuring that processing that does not result in negative discriminatory results.&lt;a href="#_ftn7" name="_ftnref7"&gt;&lt;sup&gt;&lt;sup&gt;[7]&lt;/sup&gt;&lt;/sup&gt;&lt;/a&gt; &lt;/li&gt;
&lt;li&gt;&lt;b&gt;Purpose Limitation:&lt;/b&gt; The principle of purpose limitation (Article 5(1)(b) requires that personal data must be collected for  specified, explicit, and legitimate purposes and not be further processed in a manner incompatible with those purposes.  Processing for archiving purposes in the public interest, scientific or historical research purposes or statistical purposes are not considered to be incompatible with the initial purposes. It has been noted that it is unclear if research carried out through artificial intelligence would fall under this exception as the GDPR does not define ‘scientific purposes’.&lt;a href="#_ftn8" name="_ftnref8"&gt;&lt;sup&gt;&lt;sup&gt;[8]&lt;/sup&gt;&lt;/sup&gt;&lt;/a&gt; &lt;/li&gt;
&lt;li&gt;&lt;b&gt;Privacy by Design and Default:&lt;/b&gt; Article 25 requires all data controllers to implement technical and organizational measures to meet the requirements of the regulation. This could include techniques like pseudonymisation. Data controllers also are required to implement appropriate technical and organizational measures for ensuring that by default only personal data which are necessary for a specific purpose are processed.&lt;a href="#_ftn9" name="_ftnref9"&gt;&lt;sup&gt;&lt;sup&gt;[9]&lt;/sup&gt;&lt;/sup&gt;&lt;/a&gt;&lt;/li&gt;
&lt;li&gt;&lt;b&gt;Data Protection Impact Assessments:&lt;/b&gt; Article 35 requires data controllers to undertake impact assessments if they are undertaking processing that is likely to result in a high risk to individuals. This includes if the data controller undertakes: systematic and extensive profiling, processes special categories of criminal offence data on a large scale, systematically monitor publicly accessible places on a large scale. In implementation, some jurisdictions like the UK require impact assessments on additional conditions including if the data controller: uses new technologies, uses profiling or special category data to decide on access to services, profile individuals on a large scale, process biometric data, process genetic data, match data or combine datasets from different sources, collect personal data from a source other than the individual without providing them with a privacy notice, track individuals’ location or behaviour, profile children or target marketing or online services at them, process data that might endanger the individual’s physical health or safety in the event of a security breach.&lt;a href="#_ftn10" name="_ftnref10"&gt;&lt;sup&gt;&lt;sup&gt;[10]&lt;/sup&gt;&lt;/sup&gt;&lt;/a&gt;&lt;/li&gt;
&lt;li&gt;&lt;b&gt;Security:&lt;/b&gt; Article 30 requires data controllers to ensure a level of security appropriate to the risk including employing methods like encryption and pseudonymization. &lt;/li&gt;
&lt;/ol&gt;
&lt;h3 style="text-align: justify; "&gt;Srikrishna Committee Bill and AI&lt;/h3&gt;
&lt;p style="text-align: justify; "&gt;The Draft Data Protection Bill and associated report by the Srikrishna Committee was published in August 2018 and recommends a privacy framework for India. The Bill contains a number of provisions that will directly impact data fiduciaries using AI and that try and account for the unintended consequences of emerging technologies like AI. These include:&lt;/p&gt;
&lt;ol style="text-align: justify; "&gt;
&lt;li&gt;&lt;b&gt;Definition of Harm:&lt;/b&gt; The Bill defines harm as including bodily or mental injury, loss, distortion or theft of identity, financial loss or loss of property, loss of reputation or humiliation, loss of employment, any discriminatory treatment, any subjection to blackmail or extortion, any denial or withdrawal of a service, benefit or good resulting from an evaluative decision about the data principal, any restriction placed or suffered directly or indirectly on speech, movement or any other action arising out of a fear of being observed or surveilled, any observation or surveillance that is not reasonably expected by the data principal. The Bill also allows for categories of significant harm to be further defined by the data protection authority.&lt;/li&gt;
&lt;/ol&gt;
&lt;p style="text-align: justify; "&gt;Many of the above are harms that have been associated with artificial intelligence - specifically loss employment, discriminatory treatment, and denial of service. Enabling the data protection authority to further define categories of  significant harm, could allow for unexpected harms arising from the use of AI to come under the ambit of the Bill.&lt;/p&gt;
&lt;ol style="text-align: justify; "&gt; &lt;/ol&gt; 
&lt;ul style="text-align: justify; "&gt;
&lt;li&gt;&lt;b&gt;Data Rights:&lt;/b&gt; Like the GDPR, the Bill creates a set of data rights for the individual including the right to confirmation and access, correction, data portability, and right to be forgotten. At the sametime the Bill is intentionally silent on the rights and obligations that have been incorporated into the GDPR that address automated decision making including: The right to object to processing,&lt;a href="#_ftn11" name="_ftnref11"&gt;&lt;sup&gt;&lt;sup&gt;[11]&lt;/sup&gt;&lt;/sup&gt;&lt;/a&gt; the right to opt out of automated decision making&lt;a href="#_ftn12" name="_ftnref12"&gt;&lt;sup&gt;&lt;sup&gt;[12]&lt;/sup&gt;&lt;/sup&gt;&lt;/a&gt;, and the obligation on the data controller to inform the individual about the use of automated decision making and basic information regarding the logic and impact of same.&lt;a href="#_ftn13" name="_ftnref13"&gt;&lt;sup&gt;&lt;sup&gt;[13]&lt;/sup&gt;&lt;/sup&gt;&lt;/a&gt; As justification, in their report the Committee noted the following: The right to restrict processing may be unnecessary in India as it provides only interim remedies around issues such as inaccuracy of data and the same can be achieved by a data principal approaching the DPA or courts for a stay on processing as well as simply withdraw consent. The objective of protecting against discrimination, bias, and opaque decisions that the right to object to automated processing and receive information about the processing of data in the Indian context seeks to fulfill would be better achieved through an accountability framework requiring specific data fiduciaries that will be making evaluative decisions through automated means to set up processes that ‘weed out’ discrimination. At the same time, if discrimination has taken place, individuals can seek remedy through the courts.&lt;/li&gt;
&lt;/ul&gt;
&lt;ol style="text-align: justify; "&gt; &lt;/ol&gt;
&lt;p style="text-align: justify; "&gt;By taking this approach, the Bill creates a framework to address harms arising out of AI, but does not empower the individual to decide how their data is processed and remains silent on the issue of ‘black box’ algorithms.&lt;/p&gt;
&lt;ol style="text-align: justify; "&gt; &lt;/ol&gt; 
&lt;ul style="text-align: justify; "&gt;
&lt;li&gt;&lt;b&gt;Data Quality&lt;/b&gt;: Requires data fiduciaries to ensure that personal data that is processed is complete, accurate, not misleading and updated with respect to the purposes for which it is processed. When taking steps to comply with this - data fiduciaries must take into consideration if the personal data is likely to be used to make a decision about the data principal, if it is likely to be disclosed to other individuals, if the personal data is kept in a form that distinguishes personal data based on facts from personal data based on opinions or personal assessments.&lt;a href="#_ftn14" name="_ftnref14"&gt;&lt;sup&gt;&lt;sup&gt;[14]&lt;/sup&gt;&lt;/sup&gt;&lt;/a&gt;&lt;/li&gt;
&lt;/ul&gt;
&lt;ol style="text-align: justify; "&gt; &lt;/ol&gt;
&lt;p style="text-align: justify; "&gt;This principle, while not mandating that data fiduciaries take into account considerations such as biases in datasets, could potentially be be interpreted by the data protection authority to include in its scope, means towards ensuring that data does not contain or result in bias.&lt;/p&gt;
&lt;ol style="text-align: justify; "&gt; &lt;/ol&gt; 
&lt;ul style="text-align: justify; "&gt;
&lt;li&gt;&lt;b&gt;Principle of Privacy by Design:&lt;/b&gt; Requires significant data fiduciaries to have in place a number policies and measures around several aspects of privacy. These include - (a) measures to ensure managerial, organizational, business practices and technical systems are designed in a manner to anticipate, identify, and avoid harm to the data principal (b) the obligations mentioned in Chapter II are embedded in organisational and business practices (c) technology used in the processing of personal data is in accordance with commercially accepted or certified standards (d) legitimate interests of business including any innovation is achieved without compromising privacy interests (e) privacy is protected throughout processing from the point of collection to deletion of personal data (f) processing of personal data is carried out in a transparent manner (g) the interest of the data principal is accounted for at every stage of processing of personal data.&lt;/li&gt;
&lt;/ul&gt;
&lt;ol style="text-align: justify; "&gt; &lt;/ol&gt;
&lt;p style="text-align: justify; "&gt;A number of these (a, d, e, and g)  require that the interest of the data principal is accounted for throughout the processing of personal data, This will be  significant for systems driven by artificial intelligence as a number of the harms that have arisen from the use of AI include discrimination, denial of service, or loss of employment - have been brought under the definition of harm within the Bill. Placing the interest of the data principal first is also important in protecting against unintended consequences or harms that may arise from AI.&lt;a href="#_ftn15" name="_ftnref15"&gt;&lt;sup&gt;&lt;sup&gt;[15]&lt;/sup&gt;&lt;/sup&gt;&lt;/a&gt; If enacted, it will be important to see what policies and measures emerge in the context of AI to comply with this principle. It will also be important to see what commercially accepted or certified standard companies rely on to comply with (c.)&lt;/p&gt;
&lt;ol style="text-align: justify; "&gt; &lt;/ol&gt; 
&lt;ul style="text-align: justify; "&gt;
&lt;li&gt;&lt;b&gt;Data Protection Impact Assessment:&lt;/b&gt; Requires data fiduciaries to undertake a data protection impact assessment when implementing new technologies or large scale profiling or use of sensitive personal data. Such assessments need to include a detailed description of the proposed processing operation, the purpose of the processing and the nature of personal data being processed, an assessment of the potential harm that may be caused to the data principals whose personal data is proposed to be processed, and measures for managing, minimising, mitigating or removing such risk of harm. If the Authority finds that the processing is likely to cause harm to the data principles, it may direct the data fiduciary to undertake processing in certain circumstances or entirely.  This requirement applies to all significant data fiduciaires and all other data fiduciaries as required by the DPA.&lt;a href="#_ftn16" name="_ftnref16"&gt;&lt;sup&gt;&lt;sup&gt;[16]&lt;/sup&gt;&lt;/sup&gt;&lt;/a&gt;&lt;/li&gt;
&lt;/ul&gt;
&lt;ol style="text-align: justify; "&gt; &lt;/ol&gt;
&lt;p style="text-align: justify; "&gt;This principle will apply to companies implementing AI systems. For AI systems, it will be important to see how much information the DPA will require under the requirement of data fiduciaries providing detailed descriptions of the proposed processing operation and purpose of processing.&lt;/p&gt;
&lt;ol style="text-align: justify; "&gt; &lt;/ol&gt; 
&lt;ul style="text-align: justify; "&gt;
&lt;li&gt;&lt;b&gt;Classification of data fiduciaries as significant data fiduciaries&lt;/b&gt;: The Authority has the ability to notify certain categories of data fiduciaries as significant data fiduciaries based on 1. The volume of personal data processed, 2. The sensitivity of personal data processed, turnover of the data fiduciary, risk of harm resulting from any processing being undertaken by the fiduciary, use of new technologies for processing, and other factor relevant for causing harm to any data principal. If a data fiduciary falls under the ambit of any of these conditions they are required to register with the Authority. All significant data fiduciaries must undertake data protection impact assessments, maintain records as per the bill, under go data audits, and have in place a data protection officer.&lt;/li&gt;
&lt;/ul&gt;
&lt;ol style="text-align: justify; "&gt; &lt;/ol&gt;
&lt;p style="text-align: justify; "&gt;As per this provision - companies deploying artificial intelligence would come under the definition of a significant data fiduciary and be subject to the principles of privacy by design etc. articulated in the chapter. The exception to this will be if the data fiduciary comes under the definition of ‘small entity’ found in section 48.&lt;a href="#_ftn17" name="_ftnref17"&gt;&lt;sup&gt;&lt;sup&gt;[17]&lt;/sup&gt;&lt;/sup&gt;&lt;/a&gt;&lt;/p&gt;
&lt;ol style="text-align: justify; "&gt; &lt;/ol&gt; 
&lt;ul style="text-align: justify; "&gt;
&lt;li&gt;&lt;b&gt;Restrictions on cross border transfer of personal data: &lt;/b&gt;Requires that all data fiduciaries must store a copy of personal data on a server or data centre located in India and notified categories of critical personal data must be processed in servers located in India.&lt;/li&gt;
&lt;/ul&gt;
&lt;ol style="text-align: justify; "&gt; &lt;/ol&gt;
&lt;p style="text-align: justify; "&gt;It is interesting to note that in the context of cross border sharing of data,  the Bill is creating a new category of data that can be further defined beyond personal and sensitive personal data. For companies implementing artificial intelligence, this provision may prove cumbersome to comply with as many utilize cloud storage and facilities located outside of India for the processing of larger amounts of data.&lt;a href="#_ftn18" name="_ftnref18"&gt;&lt;sup&gt;&lt;sup&gt;[18]&lt;/sup&gt;&lt;/sup&gt;&lt;/a&gt;&lt;/p&gt;
&lt;ol style="text-align: justify; "&gt; &lt;/ol&gt; 
&lt;ul style="text-align: justify; "&gt;
&lt;li&gt;&lt;b&gt;Powers and functions of the Authority&lt;/b&gt;: The Bill lays down a number of functions of the Authority one being to monitor technological developments and commercial practices that may affect protection of personal data.&lt;/li&gt;
&lt;/ul&gt;
&lt;ol style="text-align: justify; "&gt; &lt;/ol&gt;
&lt;p style="text-align: justify; "&gt;By assumption, this will include monitoring of technological developments in the field of Artificial Intelligence.&lt;a href="#_ftn19" name="_ftnref19"&gt;&lt;sup&gt;&lt;sup&gt;[19]&lt;/sup&gt;&lt;/sup&gt;&lt;/a&gt;&lt;/p&gt;
&lt;ol style="text-align: justify; "&gt; &lt;/ol&gt; 
&lt;ul style="text-align: justify; "&gt;
&lt;li&gt;&lt;b&gt;Fair and reasonable processing: &lt;/b&gt;Requires that any person processing personal data owes a duty to the data principal to process such personal data in a fair and reasonable manner that respects the privacy of the data principal. In the Srikrishna Committee report, the committee explains that the principle of the fair and reasonable is meant to address 1. Power asymmetries between data subjects and data fiduciaries - recognizing that data fiduciaires have a responsibility to act in the best interest of the data principal 2. Situations where processing may be legal but not necessary fair or in the best interest of the data principal 3. Developing trust between the data principal and the data fiduciary.&lt;a href="#_ftn20" name="_ftnref20"&gt;&lt;sup&gt;&lt;sup&gt;[20]&lt;/sup&gt;&lt;/sup&gt;&lt;/a&gt;&lt;/li&gt;
&lt;/ul&gt;
&lt;ol style="text-align: justify; "&gt; &lt;/ol&gt;
&lt;p style="text-align: justify; "&gt;This is in contrast to the GDPR which requires processing to simultaneously meet the three conditions of fairness, lawfulness, and transparency.&lt;/p&gt;
&lt;ol style="text-align: justify; "&gt; &lt;/ol&gt; 
&lt;ul style="text-align: justify; "&gt;
&lt;li&gt;&lt;b&gt;Purpose Limitation: &lt;/b&gt;Personal data can only be processed for the purposes specified or any other purpose that the data principal would reasonably expect.&lt;/li&gt;
&lt;/ul&gt;
&lt;ol style="text-align: justify; "&gt; &lt;/ol&gt;
&lt;p style="text-align: justify; "&gt;As a note, the Srikrishna Committee Bill does not include ‘scientific purposes’ as an exception to the principle of purpose limitation as found in the GDPR,&lt;a href="#_ftn21" name="_ftnref21"&gt;&lt;sup&gt;&lt;sup&gt;[21]&lt;/sup&gt;&lt;/sup&gt;&lt;/a&gt; and instead creates an exception for research, archiving, or statistical purposes.&lt;a href="#_ftn22" name="_ftnref22"&gt;&lt;sup&gt;&lt;sup&gt;[22]&lt;/sup&gt;&lt;/sup&gt;&lt;/a&gt; The DPA has the responsibility of developing codes defining research purposes under the act.&lt;a href="#_ftn23" name="_ftnref23"&gt;&lt;sup&gt;&lt;sup&gt;[23]&lt;/sup&gt;&lt;/sup&gt;&lt;/a&gt;&lt;/p&gt;
&lt;ol style="text-align: justify; "&gt;
&lt;li&gt;&lt;b&gt;Security Safeguards:&lt;/b&gt; Every data fiduciary must implement appropriate security safeguards including the use of methods such as de-identification and encryption, steps to protect the integrity of personal data, and steps necessary to prevent misuse, unauthorised access to, modification, and disclosure or destruction of personal data.&lt;a href="#_ftn24" name="_ftnref24"&gt;&lt;sup&gt;&lt;sup&gt;[24]&lt;/sup&gt;&lt;/sup&gt;&lt;/a&gt;&lt;/li&gt;
&lt;/ol&gt;
&lt;p style="text-align: justify; "&gt;Unlike the GDPR which explicitly refers to the technique of pseudonymization, the Srikrishna  uses Bill uses term de-identification.  The Srikrishna Report clarifies that the this includes techniques like pseudonymization and masking and further clarifies that because of the  risk of re-identification, de-identified personal data should still receive the same level of protection as personal data. The Bill further gives the DPA the authority to define appropriate levels of anonymization. &lt;a href="#_ftn25" name="_ftnref25"&gt;&lt;sup&gt;&lt;sup&gt;[25]&lt;/sup&gt;&lt;/sup&gt;&lt;/a&gt;&lt;/p&gt;
&lt;h3 style="text-align: justify; "&gt;Technical perspectives of Privacy and AI&lt;/h3&gt;
&lt;p style="text-align: justify; "&gt;There is an emerging body of work that is looking at solutions to the dilemma of maintaining privacy while employing artificial intelligence and finding ways in which artificial intelligence can support and strengthen privacy. For example, there are AI driven platforms that leverage the technology to help a business to meet regulatory compliance with data protection laws&lt;a href="#_ftn26" name="_ftnref26"&gt;&lt;sup&gt;&lt;sup&gt;[26]&lt;/sup&gt;&lt;/sup&gt;&lt;/a&gt;, as well as research into AI privacy enhancing technologies.&lt;a href="#_ftn27" name="_ftnref27"&gt;&lt;sup&gt;&lt;sup&gt;[27]&lt;/sup&gt;&lt;/sup&gt;&lt;/a&gt; Standards setting bodies like IEEE have undertaken work on the ethical considerations in the collection and use of personal data when designing, developing, and/or deploying AI through the standard ‘Ethically Aligned Design’.&lt;a href="#_ftn28" name="_ftnref28"&gt;&lt;sup&gt;&lt;sup&gt;[28]&lt;/sup&gt;&lt;/sup&gt;&lt;/a&gt; . In the article Artificial Intelligence and Privacy by Datatilsynet - the Norwegian Data Protection Authority&lt;a href="#_ftn29" name="_ftnref29"&gt;&lt;sup&gt;&lt;sup&gt;[29]&lt;/sup&gt;&lt;/sup&gt;&lt;/a&gt; break such methods into three categories:&lt;/p&gt;
&lt;ol style="text-align: justify; "&gt;
&lt;li&gt;Techniques for reducing the need for large amounts of training data: Such techniques  can include&lt;/li&gt;
&lt;ol&gt;
&lt;li&gt;&lt;b&gt;Generative adversarial networks (GANs):&lt;/b&gt; GANs are used to create synthetic data and can address the need for large volumes of labelled data without relying on real data containing personal data. GANs could potentially be useful from a research and development perspective in sectors like healthcare where most data would quality as sensitive personal data.&lt;/li&gt;
&lt;li&gt;&lt;b&gt;Federated Learning:&lt;/b&gt; Federated learning allows for models to be trained and improved on data from a large pool of users without directly using user data. This is achieved by running a centralized model on a client unit and subsequently improved on local data. Changes from the improvements are shared back with the centralized server. An average of the changes from multiple individual client units becomes the basis for improving the  centralized model.&lt;/li&gt;
&lt;li&gt;&lt;b&gt;Matrix Capsules&lt;/b&gt;: Proposed by Google researcher Geoff Hinton, Matrix Capsules improve the accuracy of existing neural networks while requiring less data.&lt;a href="#_ftn30" name="_ftnref30"&gt;&lt;sup&gt;&lt;sup&gt;[30]&lt;/sup&gt;&lt;/sup&gt;&lt;/a&gt;&lt;/li&gt;
&lt;/ol&gt;
&lt;li&gt;Techniques that uphold data protection without reducing the basic data set&lt;/li&gt;
&lt;ol&gt;
&lt;li&gt;&lt;b&gt;Differential Privacy&lt;/b&gt;: Differential privacy intentionally adds ‘noise’ to data when accessed. This allows for personal data to be accessed with revealing identifying information.&lt;/li&gt;
&lt;li&gt;&lt;b&gt;Homomorphic Encryption:&lt;/b&gt; Homomorphic encryption allows for the processing of data while it is still encrypted. This addresses the need to access and use large amounts of personal data for multiple purposes&lt;/li&gt;
&lt;li&gt;&lt;b&gt;Transfer Learning&lt;/b&gt;: Instead of building a new model, transfer learning relies builds upon existing models that are applied to new related purposes or tasks. This has the potential to reduce the amount of training data needed. &lt;/li&gt;
&lt;li&gt;&lt;b&gt;RAIRD&lt;/b&gt;: Developed by Statistics Norway and the Norwegian Centre for Research Data, RAIRD is a national research infrastructure that allows for access to large amounts of statistical data for research while managing statistical confidentiality. This is achieved by allowing researchers access to metadata. The metadata is used to build analyses which are then run against detailed data without giving access to actual data.&lt;a href="#_ftn31" name="_ftnref31"&gt;&lt;sup&gt;&lt;sup&gt;[31]&lt;/sup&gt;&lt;/sup&gt;&lt;/a&gt;&lt;/li&gt;
&lt;/ol&gt;
&lt;li&gt;Techniques to move beyond opaque algorithms&lt;/li&gt;
&lt;ol&gt;
&lt;li&gt;&lt;b&gt;Explainable AI (XAI): &lt;/b&gt;DARPA in collaboration with Oregon State University is researching how to create explainable models and explanation interface while ensuring a high level of learning performance in order to enable individuals to interact with, trust, and manage artificial intelligence.&lt;a href="#_ftn32" name="_ftnref32"&gt;&lt;sup&gt;&lt;sup&gt;[32]&lt;/sup&gt;&lt;/sup&gt;&lt;/a&gt; DARPA identifies a number of entities working on different models and interfaces for analytics and autonomy AI.&lt;a href="#_ftn33" name="_ftnref33"&gt;&lt;sup&gt;&lt;sup&gt;[33]&lt;/sup&gt;&lt;/sup&gt;&lt;/a&gt;&lt;/li&gt;
&lt;li&gt;&lt;b&gt;Local Interpretable Model Agnostic Explanations&lt;/b&gt;: Developed to enable trust between AI models and humans by generating explainers to highlight key aspects that were important to the model and its decision - thus providing insight into the rationale behind a model.&lt;a href="#_ftn34" name="_ftnref34"&gt;&lt;sup&gt;&lt;sup&gt;[34]&lt;/sup&gt;&lt;/sup&gt;&lt;/a&gt;&lt;/li&gt;
&lt;/ol&gt; &lt;/ol&gt;
&lt;h3 style="text-align: justify; "&gt;Public Sector use of AI and Privacy&lt;/h3&gt;
&lt;p style="text-align: justify; "&gt;The role of AI in public sector decision making has been gradually growing globally across sectors such as law enforcement, education, transportation, judicial decision making and healthcare. In India too, use of automated processing in electronic governance under the Digital India mission, domestic law enforcement agencies monitoring social media content and educational schemes is being discussed and gradually implemented. Much like the potential applications of AI across sub-sectors, the nature of regulatory issues are also diverse.&lt;/p&gt;
&lt;p style="text-align: justify; "&gt;Aside from the accountability framework discussed in the Srikrishna Committee report, the Puttaswamy judgment also provides a basis for governance of AI with respect to its concerns for privacy, in limited contexts. The sources of right to privacy as articulated in the Puttaswamy judgments included the terms ‘personal liberty’ under Article 21 of the Constitution. In order to fully appreciate how constitutional principles could apply to automated processing in India, we need to look closely at the origins of privacy under liberty. In the famous case of &lt;i&gt;AK Gopalan&lt;/i&gt; there is a protracted discussion on the contents of the rights under Article 21. Amongst the majority opinions itself, the opinion was divided. While Sastri J. and Mukherjea J. took the restrictive view that limiting the protections to bodily restraint and detention, Kania J. and Das J. take a broader view for it to include the right to sleep, play etc. Through &lt;i&gt;RC Cooper&lt;/i&gt;&lt;a href="#_ftn35" name="_ftnref35"&gt;&lt;sup&gt;&lt;sup&gt;[35]&lt;/sup&gt;&lt;/sup&gt;&lt;/a&gt; and &lt;i&gt;Maneka&lt;/i&gt;&lt;a href="#_ftn36" name="_ftnref36"&gt;&lt;sup&gt;&lt;sup&gt;[36]&lt;/sup&gt;&lt;/sup&gt;&lt;/a&gt;, the Supreme Court took steps to reverse the majority opinion in &lt;i&gt;Gopalan&lt;/i&gt; and it was established that that the freedoms and rights in Part III could be addressed by more than one provision. The expansion of ‘personal liberty’ has began in &lt;i&gt;Kharak Singh&lt;/i&gt; where the unjustified interference with a person’s right to live in his house, was held to be violative of Article 21. The reasoning in &lt;i&gt;Kharak Singh&lt;/i&gt; draws heavily from&lt;i&gt; Munn&lt;/i&gt; v. &lt;i&gt;Illinois&lt;/i&gt;&lt;a href="#_ftn37" name="_ftnref37"&gt;&lt;sup&gt;&lt;sup&gt;[37]&lt;/sup&gt;&lt;/sup&gt;&lt;/a&gt; which held life to be “more than mere animal existence.” Curiously, after taking this position &lt;i&gt;Kharak Singh&lt;/i&gt; fails to recognise a fundamental right to privacy (analogous to the Fourth Amendment protection in US) under Article 21. The position taken in &lt;i&gt;Kharak Singh&lt;/i&gt; was to extrapolate the same method of wide interpretation of ‘personal liberty’ as was accorded to ‘life’. &lt;i&gt;Maneka&lt;/i&gt; which evolved the test for enumerated rights within Part III says that the claimed right must be an integral part of or of the the same nature as the named right. It says that the claimed must be ‘in reality and substance nothing but an instance of the exercise of the named fundamental right’. The clear reading of privacy into ‘personal liberty’ in this judgment is effectively a correction of the inherent inconsistencies in the positions taken by the majority in Kharak Singh.&lt;/p&gt;
&lt;p style="text-align: justify; "&gt;The other significant change in constitutional interpretation that occurred in Maneka was with respect to the phrase ‘procedure established by law’ in Article 21. In Gopalan, the majority held that the phrase ‘procedure established by law’ does not mean procedural due process or natural justice. What this meant was that, once a ‘procedure’ was ‘established by law’, Article 21 could not be said to have been infringed. This position was entirely reversed in Maneka. The ratio in Maneka said that ‘procedure established by law’ must be fair, just and reasonable, and cannot be arbitrary and fanciful. Therefore, any infringement of the right to privacy must be through a law which follows the principles of natural justice, and is not arbitrary or unfair. It follows that any instances of automated processing for public functioning by state actors or others, must meet this standard of ‘fair, just and reasonable’.&lt;/p&gt;
&lt;p style="text-align: justify; "&gt;While there is a lot of focus internationally on what ethical AI must be, it is important that when we consider use of AI by the state, we pay heed to the existing constitutional principles which determine how AI must be evaluated against these standards. These principles however extend only to limited circumstances for protections under Article 21 are not horizontal in nature but only applicable against the state. Whether a party is the state or not is a question that has been considered several times by the Supreme Court and must be determined by functional tests. In our submission of the Justice Srikrishna Committee, we clearly recommended that where automated decision making is used for discharging of public functions, the data protection law must state that such actions are subject the the constitutional standards and are ‘just, fair and reasonable’ and satisfy the tests for both procedural and substantive due process. To a limited extent, the committee seems to have picked up the standards of ‘fair’ and ‘reasonable’ and made it applicable to all forms of processing, whether public or private. It is as yet unclear whether fairness and reasonableness as inserted in the bill would draw from the constitutional standard under Article 21. The report makes a reference to the twin principles of acting in a manner that upholds the best interest of the privacy of the individual, and processing within the reasonable expectations of the individual, which do not seem to cover the fullest essence of the legal standard under Article 21.&lt;/p&gt;
&lt;h3 style="text-align: justify; "&gt;Conclusion&lt;/h3&gt;
&lt;p style="text-align: justify; "&gt;The Srikrishna Committee Bill attempts to create an accountability framework for the use of emerging technologies including AI that is focused on placing the responsibility on companies to prevent harm. Though not as robust as found in the GDPR, the protections have been enabled through requirements such as fair and reasonable processing, ensuring data quality, and implementing principles of privacy of design. At the sametime, the Srikrishna Bill does not include provisions that can begin to address the  consumer facing ‘black box’ of AI by ensuring that individuals have information about the potential impact of decisions taken by automated means. In contrast, the GDPR has already taken important steps to tackle this by requiring companies to explain the logic and potential impact of decisions taken by automated means.&lt;/p&gt;
&lt;p style="text-align: justify; "&gt;Most importantly, the Bill gives the Data Protection Authority the necessary tools to hold companies accountable for the use of AI through the requirements of data protection audits. If enacted, it will have to be seen how these audits and the principle of privacy by design are implemented and enforced in the context of companies using  AI. Though the Bill creates a Data Protection Authority consisting of members that have significant experience in data protection, information technology, data management, data science, cyber and internet laws, and related subjects, these requirements can be further strengthened by having someone from a background of ethics and human rights.&lt;/p&gt;
&lt;p style="text-align: justify; "&gt;One of the responsibilities of the DPA under the Srikrishna Bill will be to monitor technological developments and commercial practices that may affect protection of personal data and promote measures and undertake research for innovation in the field of protection of personal data. If enacted, we hope that AI and solutions towards enhancing privacy in the context of AI like described above will be one of these focus areas of the DPA. It will also be important to see how the DPA develops impact assessments related to AI and what tools associated with the principle of Privacy by Design emerge to address AI.&lt;/p&gt;
&lt;hr style="text-align: justify; " /&gt;
&lt;p style="text-align: justify; "&gt;&lt;a href="#_ftnref1" name="_ftn1"&gt;&lt;sup&gt;&lt;sup&gt;[1]&lt;/sup&gt;&lt;/sup&gt;&lt;/a&gt; https://privacyinternational.org/topics/artificial-intelligence&lt;/p&gt;
&lt;p style="text-align: justify; "&gt;&lt;a href="#_ftnref2" name="_ftn2"&gt;&lt;sup&gt;&lt;sup&gt;[2]&lt;/sup&gt;&lt;/sup&gt;&lt;/a&gt; https://www.wired.com/story/our-machines-now-have-knowledge-well-never-understand/&lt;/p&gt;
&lt;p style="text-align: justify; "&gt;&lt;a href="#_ftnref3" name="_ftn3"&gt;&lt;sup&gt;&lt;sup&gt;[3]&lt;/sup&gt;&lt;/sup&gt;&lt;/a&gt; https://iapp.org/news/a/ai-offers-opportunity-to-increase-privacy-for-users/&lt;/p&gt;
&lt;p style="text-align: justify; "&gt;&lt;a href="#_ftnref4" name="_ftn4"&gt;&lt;sup&gt;&lt;sup&gt;[4]&lt;/sup&gt;&lt;/sup&gt;&lt;/a&gt; https://iapp.org/media/pdf/resource_center/GDPR_Study_Maldoff.pdf&lt;/p&gt;
&lt;p style="text-align: justify; "&gt;&lt;a href="#_ftnref5" name="_ftn5"&gt;&lt;sup&gt;&lt;sup&gt;[5]&lt;/sup&gt;&lt;/sup&gt;&lt;/a&gt; https://gdpr-info.eu/art-22-gdpr/&lt;/p&gt;
&lt;p style="text-align: justify; "&gt;&lt;a href="#_ftnref6" name="_ftn6"&gt;&lt;sup&gt;&lt;sup&gt;[6]&lt;/sup&gt;&lt;/sup&gt;&lt;/a&gt; https://gdpr-info.eu/art-14-gdpr/&lt;/p&gt;
&lt;p style="text-align: justify; "&gt;&lt;a href="#_ftnref7" name="_ftn7"&gt;&lt;sup&gt;&lt;sup&gt;[7]&lt;/sup&gt;&lt;/sup&gt;&lt;/a&gt; https://www.datatilsynet.no/globalassets/global/english/ai-and-privacy.pdf&lt;/p&gt;
&lt;p style="text-align: justify; "&gt;&lt;a href="#_ftnref8" name="_ftn8"&gt;&lt;sup&gt;&lt;sup&gt;[8]&lt;/sup&gt;&lt;/sup&gt;&lt;/a&gt; https://www.datatilsynet.no/globalassets/global/english/ai-and-privacy.pdf&lt;/p&gt;
&lt;p style="text-align: justify; "&gt;&lt;a href="#_ftnref9" name="_ftn9"&gt;&lt;sup&gt;&lt;sup&gt;[9]&lt;/sup&gt;&lt;/sup&gt;&lt;/a&gt; https://gdpr-info.eu/art-25-gdpr/&lt;/p&gt;
&lt;p style="text-align: justify; "&gt;&lt;a href="#_ftnref10" name="_ftn10"&gt;&lt;sup&gt;&lt;sup&gt;[10]&lt;/sup&gt;&lt;/sup&gt;&lt;/a&gt; https://ico.org.uk/for-organisations/guide-to-the-general-data-protection-regulation-gdpr/accountability-and-governance/data-protection-impact-assessments/&lt;/p&gt;
&lt;p style="text-align: justify; "&gt;&lt;a href="#_ftnref11" name="_ftn11"&gt;&lt;sup&gt;&lt;sup&gt;[11]&lt;/sup&gt;&lt;/sup&gt;&lt;/a&gt; https://gdpr-info.eu/art-21-gdpr/&lt;/p&gt;
&lt;p style="text-align: justify; "&gt;&lt;a href="#_ftnref12" name="_ftn12"&gt;&lt;sup&gt;&lt;sup&gt;[12]&lt;/sup&gt;&lt;/sup&gt;&lt;/a&gt; https://gdpr-info.eu/art-22-gdpr/&lt;/p&gt;
&lt;p style="text-align: justify; "&gt;&lt;a href="#_ftnref13" name="_ftn13"&gt;&lt;sup&gt;&lt;sup&gt;[13]&lt;/sup&gt;&lt;/sup&gt;&lt;/a&gt; https://gdpr-info.eu/art-14-gdpr/&lt;/p&gt;
&lt;p style="text-align: justify; "&gt;&lt;a href="#_ftnref14" name="_ftn14"&gt;&lt;sup&gt;&lt;sup&gt;[14]&lt;/sup&gt;&lt;/sup&gt;&lt;/a&gt;Draft Data Protection Bill 2018 -  Chapter II section 9&lt;/p&gt;
&lt;p style="text-align: justify; "&gt;&lt;a href="#_ftnref15" name="_ftn15"&gt;&lt;sup&gt;&lt;sup&gt;[15]&lt;/sup&gt;&lt;/sup&gt;&lt;/a&gt; Draft Data Protection Bill 2018 -  Chapter VII section 29&lt;/p&gt;
&lt;p style="text-align: justify; "&gt;&lt;a href="#_ftnref16" name="_ftn16"&gt;&lt;sup&gt;&lt;sup&gt;[16]&lt;/sup&gt;&lt;/sup&gt;&lt;/a&gt; Draft Data Protection Bill 2018 -  Chapter VII section 33&lt;/p&gt;
&lt;p style="text-align: justify; "&gt;&lt;a href="#_ftnref17" name="_ftn17"&gt;&lt;sup&gt;&lt;sup&gt;[17]&lt;/sup&gt;&lt;/sup&gt;&lt;/a&gt; Draft Data Protection Bill 2018 -  Chapter VII section 38&lt;/p&gt;
&lt;p style="text-align: justify; "&gt;&lt;a href="#_ftnref18" name="_ftn18"&gt;&lt;sup&gt;&lt;sup&gt;[18]&lt;/sup&gt;&lt;/sup&gt;&lt;/a&gt; Draft Data Protection Bill 2018 -  Chapter VIII section 40&lt;/p&gt;
&lt;p style="text-align: justify; "&gt;&lt;a href="#_ftnref19" name="_ftn19"&gt;&lt;sup&gt;&lt;sup&gt;[19]&lt;/sup&gt;&lt;/sup&gt;&lt;/a&gt; Draft Data Protection Bill 2018 -  Chapter X section 60&lt;/p&gt;
&lt;p style="text-align: justify; "&gt;&lt;a href="#_ftnref20" name="_ftn20"&gt;&lt;sup&gt;&lt;sup&gt;[20]&lt;/sup&gt;&lt;/sup&gt;&lt;/a&gt; Draft Data Protection Bill 2018 -  Chapter II section 4&lt;/p&gt;
&lt;p style="text-align: justify; "&gt;&lt;a href="#_ftnref21" name="_ftn21"&gt;&lt;sup&gt;&lt;sup&gt;[21]&lt;/sup&gt;&lt;/sup&gt;&lt;/a&gt; Draft Data Protection Bill 2018 - Chapter II section 5&lt;/p&gt;
&lt;p style="text-align: justify; "&gt;&lt;a href="#_ftnref22" name="_ftn22"&gt;&lt;sup&gt;&lt;sup&gt;[22]&lt;/sup&gt;&lt;/sup&gt;&lt;/a&gt; Draft Data Protection Bill 2018 -  Chapter IX Section 45&lt;/p&gt;
&lt;p style="text-align: justify; "&gt;&lt;a href="#_ftnref23" name="_ftn23"&gt;&lt;sup&gt;&lt;sup&gt;[23]&lt;/sup&gt;&lt;/sup&gt;&lt;/a&gt; Draft Data Protection Bill 2018 - Chapter XIV section 97&lt;/p&gt;
&lt;p style="text-align: justify; "&gt;&lt;a href="#_ftnref24" name="_ftn24"&gt;&lt;sup&gt;&lt;sup&gt;[24]&lt;/sup&gt;&lt;/sup&gt;&lt;/a&gt; Draft Data Protection Bill 2018 - Chapter VII section 31&lt;/p&gt;
&lt;p style="text-align: justify; "&gt;&lt;a href="#_ftnref25" name="_ftn25"&gt;&lt;sup&gt;&lt;sup&gt;[25]&lt;/sup&gt;&lt;/sup&gt;&lt;/a&gt; Srikrishna Committee Report on Data Protection pg. 36 and 37. Available at: http://www.prsindia.org/uploads/media/Data%20Protection/Committee%20Report%20on%20Draft%20Personal%20Data%20Protection%20Bill,%202018.pdf&lt;/p&gt;
&lt;p style="text-align: justify; "&gt;&lt;a href="#_ftnref26" name="_ftn26"&gt;&lt;sup&gt;&lt;sup&gt;[26]&lt;/sup&gt;&lt;/sup&gt;&lt;/a&gt; https://www.ciosummits.com/Online_Assets_DocAuthority_Whitepaper_-_Guide_to_Intelligent_GDPR_Compliance.pdf&lt;/p&gt;
&lt;p style="text-align: justify; "&gt;&lt;a href="#_ftnref27" name="_ftn27"&gt;&lt;sup&gt;&lt;sup&gt;[27]&lt;/sup&gt;&lt;/sup&gt;&lt;/a&gt; https://jolt.law.harvard.edu/assets/articlePDFs/v31/31HarvJLTech217.pdf&lt;/p&gt;
&lt;p style="text-align: justify; "&gt;&lt;a href="#_ftnref28" name="_ftn28"&gt;&lt;sup&gt;&lt;sup&gt;[28]&lt;/sup&gt;&lt;/sup&gt;&lt;/a&gt; https://standards.ieee.org/content/dam/ieee-standards/standards/web/documents/other/ead_personal_data_v2.pdf&lt;/p&gt;
&lt;p style="text-align: justify; "&gt;&lt;a href="#_ftnref29" name="_ftn29"&gt;&lt;sup&gt;&lt;sup&gt;[29]&lt;/sup&gt;&lt;/sup&gt;&lt;/a&gt; https://www.datatilsynet.no/globalassets/global/english/ai-and-privacy.pdf&lt;/p&gt;
&lt;p style="text-align: justify; "&gt;&lt;a href="#_ftnref30" name="_ftn30"&gt;&lt;sup&gt;&lt;sup&gt;[30]&lt;/sup&gt;&lt;/sup&gt;&lt;/a&gt; https://www.artificial-intelligence.blog/news/capsule-networks&lt;/p&gt;
&lt;p style="text-align: justify; "&gt;&lt;a href="#_ftnref31" name="_ftn31"&gt;&lt;sup&gt;&lt;sup&gt;[31]&lt;/sup&gt;&lt;/sup&gt;&lt;/a&gt; http://raird.no/about/factsheet.html&lt;/p&gt;
&lt;p style="text-align: justify; "&gt;&lt;a href="#_ftnref32" name="_ftn32"&gt;&lt;sup&gt;&lt;sup&gt;[32]&lt;/sup&gt;&lt;/sup&gt;&lt;/a&gt; https://www.darpa.mil/attachments/XAIProgramUpdate.pdf&lt;/p&gt;
&lt;p style="text-align: justify; "&gt;&lt;a href="#_ftnref33" name="_ftn33"&gt;&lt;sup&gt;&lt;sup&gt;[33]&lt;/sup&gt;&lt;/sup&gt;&lt;/a&gt; https://www.darpa.mil/attachments/XAIProgramUpdate.pdf&lt;/p&gt;
&lt;p style="text-align: justify; "&gt;&lt;a href="#_ftnref34" name="_ftn34"&gt;&lt;sup&gt;&lt;sup&gt;[34]&lt;/sup&gt;&lt;/sup&gt;&lt;/a&gt; https://www.oreilly.com/learning/introduction-to-local-interpretable-model-agnostic-explanations-lime&lt;/p&gt;
&lt;p style="text-align: justify; "&gt;&lt;a href="#_ftnref35" name="_ftn35"&gt;&lt;sup&gt;&lt;sup&gt;[35]&lt;/sup&gt;&lt;/sup&gt;&lt;/a&gt; &lt;i&gt;R C Cooper&lt;/i&gt; v. &lt;i&gt;Union of India&lt;/i&gt;, 1970 SCR (3) 530.&lt;/p&gt;
&lt;p style="text-align: justify; "&gt;&lt;a href="#_ftnref36" name="_ftn36"&gt;&lt;sup&gt;&lt;sup&gt;[36]&lt;/sup&gt;&lt;/sup&gt;&lt;/a&gt; &lt;i&gt;Maneka Gandhi&lt;/i&gt; v. &lt;i&gt;Union of India&lt;/i&gt;, 1978 SCR (2) 621.&lt;/p&gt;
&lt;p style="text-align: justify; "&gt;&lt;a href="#_ftnref37" name="_ftn37"&gt;&lt;sup&gt;&lt;sup&gt;[37]&lt;/sup&gt;&lt;/sup&gt;&lt;/a&gt; 94 US 113 (1877).&lt;/p&gt;
        &lt;p&gt;
        For more details visit &lt;a href='https://cis-india.org/internet-governance/blog/the-srikrishna-committee-data-protection-bill-and-artificial-intelligence-in-india'&gt;https://cis-india.org/internet-governance/blog/the-srikrishna-committee-data-protection-bill-and-artificial-intelligence-in-india&lt;/a&gt;
        &lt;/p&gt;
    </description>
    <dc:publisher>No publisher</dc:publisher>
    <dc:creator>Amber Sinha and Elonnai Hickok</dc:creator>
    <dc:rights></dc:rights>

    
        <dc:subject>Internet Governance</dc:subject>
    
    
        <dc:subject>Artificial Intelligence</dc:subject>
    
    
        <dc:subject>Privacy</dc:subject>
    

   <dc:date>2018-09-03T13:29:12Z</dc:date>
   <dc:type>Blog Entry</dc:type>
   </item>


    <item rdf:about="https://cis-india.org/internet-governance/news/unescap-google-ai-meeting">
    <title>UNESCAP Google AI Meeting</title>
    <link>https://cis-india.org/internet-governance/news/unescap-google-ai-meeting</link>
    <description>
        &lt;b&gt;Arindrajit was a panelist at the event on AI in public service delivery hosted by UNESCAP Bangkok on August 29, 2018. The event was co-organized by Economic and Social Commission for Asia and the Pacific and Google.&lt;/b&gt;
        &lt;p style="text-align: justify; "&gt;The discussion centered around the two questions (1) Is AI different from other technological advancements in the past and (2) Recommendations for policy-makers to enhance AI in Public Service Delivery.The other panelists were Dr. Urs Gasser (Berkman), Vidushi Marda ( Art.19), Malavika Jayaram (Digital  Asia Hub) and Jake Lucchi ( Google) The panel was a platform to discuss some of our findings in our case studies on healthcare and agriculture, which we will receive comments on and will get published in November.&lt;br /&gt;&lt;br /&gt;&lt;/p&gt;
        &lt;p&gt;
        For more details visit &lt;a href='https://cis-india.org/internet-governance/news/unescap-google-ai-meeting'&gt;https://cis-india.org/internet-governance/news/unescap-google-ai-meeting&lt;/a&gt;
        &lt;/p&gt;
    </description>
    <dc:publisher>No publisher</dc:publisher>
    <dc:creator>Admin</dc:creator>
    <dc:rights></dc:rights>

    
        <dc:subject>Internet Governance</dc:subject>
    
    
        <dc:subject>Artificial Intelligence</dc:subject>
    
    
        <dc:subject>Privacy</dc:subject>
    

   <dc:date>2018-09-20T15:47:42Z</dc:date>
   <dc:type>News Item</dc:type>
   </item>




</rdf:RDF>
